home *** CD-ROM | disk | FTP | other *** search
/ Mac Easy 2010 May / Mac Life Ubuntu.iso / casper / filesystem.squashfs / usr / src / linux-headers-2.6.28-15 / arch / x86 / include / asm / dma-mapping.h < prev    next >
Encoding:
C/C++ Source or Header  |  2009-09-09  |  8.4 KB  |  309 lines

  1. #ifndef _ASM_X86_DMA_MAPPING_H
  2. #define _ASM_X86_DMA_MAPPING_H
  3.  
  4. /*
  5.  * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
  6.  * Documentation/DMA-API.txt for documentation.
  7.  */
  8.  
  9. #include <linux/scatterlist.h>
  10. #include <asm/io.h>
  11. #include <asm/swiotlb.h>
  12. #include <asm-generic/dma-coherent.h>
  13.  
  14. extern dma_addr_t bad_dma_address;
  15. extern int iommu_merge;
  16. extern struct device x86_dma_fallback_dev;
  17. extern int panic_on_overflow;
  18.  
  19. struct dma_mapping_ops {
  20.     int             (*mapping_error)(struct device *dev,
  21.                      dma_addr_t dma_addr);
  22.     void*           (*alloc_coherent)(struct device *dev, size_t size,
  23.                 dma_addr_t *dma_handle, gfp_t gfp);
  24.     void            (*free_coherent)(struct device *dev, size_t size,
  25.                 void *vaddr, dma_addr_t dma_handle);
  26.     dma_addr_t      (*map_single)(struct device *hwdev, phys_addr_t ptr,
  27.                 size_t size, int direction);
  28.     void            (*unmap_single)(struct device *dev, dma_addr_t addr,
  29.                 size_t size, int direction);
  30.     void            (*sync_single_for_cpu)(struct device *hwdev,
  31.                 dma_addr_t dma_handle, size_t size,
  32.                 int direction);
  33.     void            (*sync_single_for_device)(struct device *hwdev,
  34.                 dma_addr_t dma_handle, size_t size,
  35.                 int direction);
  36.     void            (*sync_single_range_for_cpu)(struct device *hwdev,
  37.                 dma_addr_t dma_handle, unsigned long offset,
  38.                 size_t size, int direction);
  39.     void            (*sync_single_range_for_device)(struct device *hwdev,
  40.                 dma_addr_t dma_handle, unsigned long offset,
  41.                 size_t size, int direction);
  42.     void            (*sync_sg_for_cpu)(struct device *hwdev,
  43.                 struct scatterlist *sg, int nelems,
  44.                 int direction);
  45.     void            (*sync_sg_for_device)(struct device *hwdev,
  46.                 struct scatterlist *sg, int nelems,
  47.                 int direction);
  48.     int             (*map_sg)(struct device *hwdev, struct scatterlist *sg,
  49.                 int nents, int direction);
  50.     void            (*unmap_sg)(struct device *hwdev,
  51.                 struct scatterlist *sg, int nents,
  52.                 int direction);
  53.     int             (*dma_supported)(struct device *hwdev, u64 mask);
  54.     int        is_phys;
  55. };
  56.  
  57. extern struct dma_mapping_ops *dma_ops;
  58.  
  59. static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
  60. {
  61. #ifdef CONFIG_X86_32
  62.     return dma_ops;
  63. #else
  64.     if (unlikely(!dev) || !dev->archdata.dma_ops)
  65.         return dma_ops;
  66.     else
  67.         return dev->archdata.dma_ops;
  68. #endif /* _ASM_X86_DMA_MAPPING_H */
  69. }
  70.  
  71. /* Make sure we keep the same behaviour */
  72. static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
  73. {
  74. #ifdef CONFIG_X86_64
  75.     struct dma_mapping_ops *ops = get_dma_ops(dev);
  76.     if (ops->mapping_error)
  77.         return ops->mapping_error(dev, dma_addr);
  78.  
  79. #endif
  80.     return (dma_addr == bad_dma_address);
  81. }
  82.  
  83. #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
  84. #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
  85. #define dma_is_consistent(d, h)    (1)
  86.  
  87. extern int dma_supported(struct device *hwdev, u64 mask);
  88. extern int dma_set_mask(struct device *dev, u64 mask);
  89.  
  90. extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
  91.                     dma_addr_t *dma_addr, gfp_t flag);
  92.  
  93. static inline dma_addr_t
  94. dma_map_single(struct device *hwdev, void *ptr, size_t size,
  95.            int direction)
  96. {
  97.     struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  98.  
  99.     BUG_ON(!valid_dma_direction(direction));
  100.     return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
  101. }
  102.  
  103. static inline void
  104. dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
  105.          int direction)
  106. {
  107.     struct dma_mapping_ops *ops = get_dma_ops(dev);
  108.  
  109.     BUG_ON(!valid_dma_direction(direction));
  110.     if (ops->unmap_single)
  111.         ops->unmap_single(dev, addr, size, direction);
  112. }
  113.  
  114. static inline int
  115. dma_map_sg(struct device *hwdev, struct scatterlist *sg,
  116.        int nents, int direction)
  117. {
  118.     struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  119.  
  120.     BUG_ON(!valid_dma_direction(direction));
  121.     return ops->map_sg(hwdev, sg, nents, direction);
  122. }
  123.  
  124. static inline void
  125. dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
  126.          int direction)
  127. {
  128.     struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  129.  
  130.     BUG_ON(!valid_dma_direction(direction));
  131.     if (ops->unmap_sg)
  132.         ops->unmap_sg(hwdev, sg, nents, direction);
  133. }
  134.  
  135. static inline void
  136. dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
  137.             size_t size, int direction)
  138. {
  139.     struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  140.  
  141.     BUG_ON(!valid_dma_direction(direction));
  142.     if (ops->sync_single_for_cpu)
  143.         ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
  144.     flush_write_buffers();
  145. }
  146.  
  147. static inline void
  148. dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
  149.                size_t size, int direction)
  150. {
  151.     struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  152.  
  153.     BUG_ON(!valid_dma_direction(direction));
  154.     if (ops->sync_single_for_device)
  155.         ops->sync_single_for_device(hwdev, dma_handle, size, direction);
  156.     flush_write_buffers();
  157. }
  158.  
  159. static inline void
  160. dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
  161.                   unsigned long offset, size_t size, int direction)
  162. {
  163.     struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  164.  
  165.     BUG_ON(!valid_dma_direction(direction));
  166.     if (ops->sync_single_range_for_cpu)
  167.         ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
  168.                            size, direction);
  169.     flush_write_buffers();
  170. }
  171.  
  172. static inline void
  173. dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
  174.                  unsigned long offset, size_t size,
  175.                  int direction)
  176. {
  177.     struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  178.  
  179.     BUG_ON(!valid_dma_direction(direction));
  180.     if (ops->sync_single_range_for_device)
  181.         ops->sync_single_range_for_device(hwdev, dma_handle,
  182.                           offset, size, direction);
  183.     flush_write_buffers();
  184. }
  185.  
  186. static inline void
  187. dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
  188.             int nelems, int direction)
  189. {
  190.     struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  191.  
  192.     BUG_ON(!valid_dma_direction(direction));
  193.     if (ops->sync_sg_for_cpu)
  194.         ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
  195.     flush_write_buffers();
  196. }
  197.  
  198. static inline void
  199. dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
  200.                int nelems, int direction)
  201. {
  202.     struct dma_mapping_ops *ops = get_dma_ops(hwdev);
  203.  
  204.     BUG_ON(!valid_dma_direction(direction));
  205.     if (ops->sync_sg_for_device)
  206.         ops->sync_sg_for_device(hwdev, sg, nelems, direction);
  207.  
  208.     flush_write_buffers();
  209. }
  210.  
  211. static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
  212.                       size_t offset, size_t size,
  213.                       int direction)
  214. {
  215.     struct dma_mapping_ops *ops = get_dma_ops(dev);
  216.  
  217.     BUG_ON(!valid_dma_direction(direction));
  218.     return ops->map_single(dev, page_to_phys(page) + offset,
  219.                    size, direction);
  220. }
  221.  
  222. static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
  223.                   size_t size, int direction)
  224. {
  225.     dma_unmap_single(dev, addr, size, direction);
  226. }
  227.  
  228. static inline void
  229. dma_cache_sync(struct device *dev, void *vaddr, size_t size,
  230.     enum dma_data_direction dir)
  231. {
  232.     flush_write_buffers();
  233. }
  234.  
  235. static inline int dma_get_cache_alignment(void)
  236. {
  237.     /* no easy way to get cache size on all x86, so return the
  238.      * maximum possible, to be safe */
  239.     return boot_cpu_data.x86_clflush_size;
  240. }
  241.  
  242. static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
  243.                             gfp_t gfp)
  244. {
  245.     unsigned long dma_mask = 0;
  246.  
  247.     dma_mask = dev->coherent_dma_mask;
  248.     if (!dma_mask)
  249.         dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
  250.  
  251.     return dma_mask;
  252. }
  253.  
  254. static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
  255. {
  256.     unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
  257.  
  258.     if (dma_mask <= DMA_24BIT_MASK)
  259.         gfp |= GFP_DMA;
  260. #ifdef CONFIG_X86_64
  261.     if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
  262.         gfp |= GFP_DMA32;
  263. #endif
  264.        return gfp;
  265. }
  266.  
  267. static inline void *
  268. dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
  269.         gfp_t gfp)
  270. {
  271.     struct dma_mapping_ops *ops = get_dma_ops(dev);
  272.     void *memory;
  273.  
  274.     gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
  275.  
  276.     if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
  277.         return memory;
  278.  
  279.     if (!dev) {
  280.         dev = &x86_dma_fallback_dev;
  281.         gfp |= GFP_DMA;
  282.     }
  283.  
  284.     if (!is_device_dma_capable(dev))
  285.         return NULL;
  286.  
  287.     if (!ops->alloc_coherent)
  288.         return NULL;
  289.  
  290.     return ops->alloc_coherent(dev, size, dma_handle,
  291.                    dma_alloc_coherent_gfp_flags(dev, gfp));
  292. }
  293.  
  294. static inline void dma_free_coherent(struct device *dev, size_t size,
  295.                      void *vaddr, dma_addr_t bus)
  296. {
  297.     struct dma_mapping_ops *ops = get_dma_ops(dev);
  298.  
  299.     WARN_ON(irqs_disabled());       /* for portability */
  300.  
  301.     if (dma_release_from_coherent(dev, get_order(size), vaddr))
  302.         return;
  303.  
  304.     if (ops->free_coherent)
  305.         ops->free_coherent(dev, size, vaddr, bus);
  306. }
  307.  
  308. #endif
  309.